# install BiocManager package if not installed yet.
# BiocManager is the package installer for Bioconductor software.
if (!requireNamespace("BiocManager", quietly = TRUE))
install.packages("BiocManager")
# install packages if not yet installed.
pkgs <- c("SingleCellExperiment",
"ExperimentHub",
"edgeR",
"biomaRt",
"DropletUtils",
"scRNAseq",
"scater",
"scuttle",
"scran",
"scry",
"BiocSingular",
"scDblFinder",
"Seurat",
"PCAtools",
"glmpca",
"genefilter",
"pheatmap",
"tidyverse",
"mclust",
"ggplot2",
"devtools",
"SingleR")
notInstalled <- pkgs[!pkgs %in% installed.packages()[,1]]
if(length(notInstalled) > 0){
BiocManager::install(notInstalled)
}
library(devtools)
install_github("immunogenomics/harmony",
dependencies = TRUE,
force = TRUE)
We here make use of the publication of Anna Cuomo et al. (last author Oliver Stegle), which we will refer to as the iPSC dataset. The paper that describes this dataset can be found using this link.
In the experiment, the authors harvested induced pluripotent stem cells (iPSCs) from 125 healthy human donors. These cells were used to study the endoderm differentiation process. In this process, iPSCs differentiate to endoderm cells, a process which takes approximately three days. As such, the authors cultered the iPSCs cell lines and allowed for differentiation for three days. During the experiment, cells were harvested at four different time points: day0 (directly at to incubation), day1, day2 and day3. Knowing the process of endoderm differentiation, these time points should correspond with different cell types: day0 are (undifferentiated) iPSCs, day1 are mesendoderm cells, day2 are “intermediate” cells and day3 are fully differentiated endoderm cells.
This dataset was generated using the SMART-Seq2 scRNA-seq protocol.
The final goal of the experiment was to characterize population variation in the process of endoderm differentiation.
For this lab session, we will work with a subset of the data, i.e., the data for the first (alphabetically) 15 patients in the experiment. These are the data you already downloaded for lab session 2 using the belnet filesender link.
The data original (125 patient) could be downloaded from Zenodo. At the bottom of this web-page, we can download the files raw_counts.csv.zip and cell_metadata_cols.tsv and store these files locally. We do not recommend doing this during the lab session, to avoid overloading the system.
First we read in the count matrix:
library(SingleCellExperiment)
sce <- readRDS("/Users/jg/Desktop/sce_15_cuomo.rds")
Exploration of the metadata is essential to get a better idea of what the experiment was about and how it was organized.
colData(sce)[1:5,1:10]
## DataFrame with 5 rows and 10 columns
## assigned auxDir cell_filter cell_name
## <integer> <character> <logical> <character>
## 21554_5#104 1 aux_info TRUE 21554_5#104
## 21554_5#110 1 aux_info TRUE 21554_5#110
## 21554_5#113 1 aux_info TRUE 21554_5#113
## 21554_5#117 1 aux_info TRUE 21554_5#117
## 21554_5#127 1 aux_info TRUE 21554_5#127
## compatible_fragment_ratio day donor expected_format
## <numeric> <character> <character> <character>
## 21554_5#104 0.999981 day2 dixh IU
## 21554_5#110 0.999964 day2 dixh IU
## 21554_5#113 0.999945 day2 dixh IU
## 21554_5#117 0.999916 day2 dixh IU
## 21554_5#127 0.999863 day2 dixh IU
## experiment frag_dist_length
## <character> <integer>
## 21554_5#104 expt_21 1001
## 21554_5#110 expt_21 1001
## 21554_5#113 expt_21 1001
## 21554_5#117 expt_21 1001
## 21554_5#127 expt_21 1001
colnames(colData(sce))
## [1] "assigned"
## [2] "auxDir"
## [3] "cell_filter"
## [4] "cell_name"
## [5] "compatible_fragment_ratio"
## [6] "day"
## [7] "donor"
## [8] "expected_format"
## [9] "experiment"
## [10] "frag_dist_length"
## [11] "gc_bias_correct"
## [12] "is_cell_control"
## [13] "is_cell_control_bulk"
## [14] "is_cell_control_control"
## [15] "library_types"
## [16] "libType"
## [17] "log10_total_counts"
## [18] "log10_total_counts_endogenous"
## [19] "log10_total_counts_ERCC"
## [20] "log10_total_counts_feature_control"
## [21] "log10_total_counts_MT"
## [22] "log10_total_features"
## [23] "log10_total_features_endogenous"
## [24] "log10_total_features_ERCC"
## [25] "log10_total_features_feature_control"
## [26] "log10_total_features_MT"
## [27] "mapping_type"
## [28] "mates1"
## [29] "mates2"
## [30] "n_alt_reads"
## [31] "n_total_reads"
## [32] "num_assigned_fragments"
## [33] "num_bias_bins"
## [34] "num_bootstraps"
## [35] "num_compatible_fragments"
## [36] "num_consistent_mappings"
## [37] "num_inconsistent_mappings"
## [38] "num_libraries"
## [39] "num_mapped"
## [40] "num_processed"
## [41] "num_targets"
## [42] "nvars_used"
## [43] "pct_counts_endogenous"
## [44] "pct_counts_ERCC"
## [45] "pct_counts_feature_control"
## [46] "pct_counts_MT"
## [47] "pct_counts_top_100_features"
## [48] "pct_counts_top_100_features_endogenous"
## [49] "pct_counts_top_100_features_feature_control"
## [50] "pct_counts_top_200_features"
## [51] "pct_counts_top_200_features_endogenous"
## [52] "pct_counts_top_50_features"
## [53] "pct_counts_top_50_features_endogenous"
## [54] "pct_counts_top_50_features_ERCC"
## [55] "pct_counts_top_50_features_feature_control"
## [56] "pct_counts_top_500_features"
## [57] "pct_counts_top_500_features_endogenous"
## [58] "percent_mapped"
## [59] "plate_id"
## [60] "plate_well_id"
## [61] "post_prob"
## [62] "public_name"
## [63] "read_files"
## [64] "salmon_version"
## [65] "samp_type"
## [66] "sample_id"
## [67] "seq_bias_correct"
## [68] "size_factor"
## [69] "start_time"
## [70] "strand_mapping_bias"
## [71] "total_counts"
## [72] "total_counts_endogenous"
## [73] "total_counts_ERCC"
## [74] "total_counts_feature_control"
## [75] "total_counts_MT"
## [76] "total_features"
## [77] "total_features_endogenous"
## [78] "total_features_ERCC"
## [79] "total_features_feature_control"
## [80] "total_features_MT"
## [81] "used_in_expt"
## [82] "well_id"
## [83] "well_type"
## [84] "donor_short_id"
## [85] "donor_long_id"
## [86] "pseudo"
## [87] "PC1_top100hvgs"
## [88] "PC1_top200hvgs"
## [89] "PC1_top500hvgs"
## [90] "PC1_top1000hvgs"
## [91] "PC1_top2000hvgs"
## [92] "princ_curve"
## [93] "princ_curve_scaled01"
As stated in the paper, cells were sampled on 4 time points. Each of these time points is expected to correspond with different cell types (day0 = iPSC, day1 = mesendoderm, day2 = intermediate and day3 = endoderm).
table(colData(sce)$day)
##
## day0 day1 day2 day3
## 876 987 1124 890
As stated in the paper, cells were harvested from 125 patients. Here, we are working on a subset with 15 patients. The number of cells harvested per patient (over all time points) ranges from 31 to 637.
length(table(colData(sce)$donor)) # number of donors
## [1] 15
range(table(colData(sce)$donor)) # cells per donor
## [1] 31 637
Below, we look how many cells are harvest per patent and per time point.
table(colData(sce)$donor,colData(sce)$day)
##
## day0 day1 day2 day3
## aowh 88 100 93 95
## aoxv 68 58 96 71
## babz 28 0 41 0
## bezi 13 11 4 3
## bima 0 0 44 31
## bokz 159 200 164 114
## cicb 42 21 75 26
## ciwj 40 27 35 39
## cuhk 41 47 39 27
## datg 185 147 136 115
## dixh 0 46 73 84
## eesb 66 106 103 195
## eipl 99 189 198 57
## eiwy 25 18 10 25
## eoxi 22 17 13 8
We see that for many patients the data is complete, i.e. cells were sampled on all time points.
Practically, the cells were prepared in 28 batches. Since we here only look at a subset of the data, we see that only 14 of these batches are represented here.
length(table(colData(sce)$experiment))
## [1] 14
table(colData(sce)$experiment, colData(sce)$day)
##
## day0 day1 day2 day3
## expt_21 0 46 73 84
## expt_22 22 17 13 8
## expt_24 28 0 41 0
## expt_29 73 91 93 86
## expt_30 15 9 0 9
## expt_31 83 68 114 53
## expt_33 70 49 53 64
## expt_34 274 298 247 165
## expt_36 25 18 10 25
## expt_39 13 11 4 3
## expt_41 99 189 198 57
## expt_42 0 0 44 31
## expt_43 134 164 199 266
## expt_45 40 27 35 39
The rowData slot of a SingleCellExperiment object allows for storing information on the features, i.e. the genes, in a dataset. In our object, the rowData slot currently contains the following:
head(rowData(sce))
## DataFrame with 6 rows and 1 column
## V1
## <character>
## 1 ENSG00000000003_TSPAN6
## 2 ENSG00000000419_DPM1
## 3 ENSG00000000457_SCYL3
## 4 ENSG00000000460_C1or..
## 5 ENSG00000001036_FUCA2
## 6 ENSG00000001084_GCLC
To improve our gene-level information, we may:
Split V1 into two columns, one with the ENSEMBL ID and the other with the gene symbol.
Display which chromosome the gene is located
Many more options are possible, but are not necessary for us right now.
rowData(sce) <- data.frame(Ensembl = gsub("_.*", "", rowData(sce)$V1),
Symbol = gsub("^[^_]*_", "", rowData(sce)$V1))
head(rowData(sce))
## DataFrame with 6 rows and 2 columns
## Ensembl Symbol
## <character> <character>
## 1 ENSG00000000003 TSPAN6
## 2 ENSG00000000419 DPM1
## 3 ENSG00000000457 SCYL3
## 4 ENSG00000000460 C1orf112
## 5 ENSG00000001036 FUCA2
## 6 ENSG00000001084 GCLC
# currently issues with ensembl server -> do not evaluate this chunk
library("biomaRt")
ensembl75 <- useEnsembl(biomart = 'genes',
dataset = 'hsapiens_gene_ensembl',
version = 75)
GeneInfo <- getBM(attributes = c("ensembl_gene_id", # To match with rownames SCE
"chromosome_name"), # Info on chromose
mart = ensembl75)
GeneInfo <- GeneInfo[match(rowData(sce)$Ensembl, GeneInfo$ensembl_gene_id),]
rowData(sce) <- cbind(rowData(sce), GeneInfo)
head(rowData(sce))
all(rowData(sce)$Ensembl == rowData(sce)$ensembl_gene_id)
# identical, as desired, so we could optionally remove one of the two
Let us first try the very simple and very lenient filtering criterion that we adopted for the Macosko dataset.
keep <- rowSums(assays(sce)$counts > 0) > 10
table(keep)
## keep
## TRUE
## 11231
We see that this filtering strategy does not remove any genes for this dataset. In general, datasets from plate-based scRNA-seq dataset have a far higher sequencing depth than data from droplet-based protocols. As requiring a minimum expression of 1 count in at least 10 cells is a very lenient criterion if we consider that we have 36.000 cells, we should consider adopting a more stringent filtering criterium, like the filterByExpr from edgeR:
library(edgeR)
table(colData(sce)$day)
##
## day0 day1 day2 day3
## 876 987 1124 890
keep2 <- edgeR::filterByExpr(y=sce,
group = colData(sce)$day,
min.count = 5,
min.prop = 0.4)
table(keep2)
## keep2
## FALSE TRUE
## 857 10374
sce <- sce[keep2,]
library(scater)
## Loading required package: scuttle
## Loading required package: ggplot2
##
## Attaching package: 'scater'
## The following object is masked from 'package:limma':
##
## plotMDS
# check ERCC spike-in transcripts
sum(grepl("^ERCC-", rowData(sce)$Symbol)) # no spike-in transcripts available
## [1] 0
is.mito <- grepl("^MT", rowData(sce)$chromosome_name)
sum(is.mito) # 13 mitochondrial genes
## [1] 0
df <- perCellQCMetrics(sce, subsets=list(Mito=is.mito))
head(df)
## DataFrame with 6 rows and 6 columns
## sum detected subsets_Mito_sum subsets_Mito_detected
## <numeric> <numeric> <numeric> <numeric>
## 21554_5#104 138676.3 5305 0 0
## 21554_5#110 685123.5 5927 0 0
## 21554_5#113 1671911.4 5613 0 0
## 21554_5#117 90419.4 6066 0 0
## 21554_5#127 59463.2 6549 0 0
## 21554_5#128 416482.7 7870 0 0
## subsets_Mito_percent total
## <numeric> <numeric>
## 21554_5#104 0 138676.3
## 21554_5#110 0 685123.5
## 21554_5#113 0 1671911.4
## 21554_5#117 0 90419.4
## 21554_5#127 0 59463.2
## 21554_5#128 0 416482.7
## add the QC variables to sce object
colData(sce) <- cbind(colData(sce), df)
In the figure below, we see that several cells have a very low number of expressed genes, and where most of the molecules are derived from mitochondrial genes. This indicates likely damaged cells, presumably because of loss of cytoplasmic RNA from perforated cells, so we should remove these for the downstream analysis.
# Number of genes vs library size
plotColData(sce, x = "sum", y="detected", colour_by="day")
# Mitochondrial genes
plotColData(sce, x = "detected", y="subsets_Mito_percent", colour_by="day")
Below, we remove cells that are outlying with respect to
We remove a total of \(301\) cells, mainly due to low sequencing depth and low number of genes detected.
lowLib <- isOutlier(df$sum, type="lower", log=TRUE)
lowFeatures <- isOutlier(df$detected, type="lower", log=TRUE)
highMito <- isOutlier(df$subsets_Mito_percent, type="higher")
table(lowLib)
## lowLib
## FALSE TRUE
## 3676 201
table(lowFeatures)
## lowFeatures
## FALSE TRUE
## 3813 64
table(highMito)
## highMito
## FALSE
## 3877
discardCells <- (lowLib | lowFeatures | highMito)
table(discardCells)
## discardCells
## FALSE TRUE
## 3633 244
colData(sce)$discardCells <- discardCells
# visualize cells to be removed
plotColData(sce, x = "detected", y="subsets_Mito_percent", colour_by = "discardCells")
plotColData(sce, x = "sum", y="detected", colour_by="discardCells")
# visualize cells to be removed
plotColData(sce, x = "detected", y="subsets_Mito_percent", colour_by = "donor")
plotColData(sce, x = "sum", y="detected", colour_by="donor")
# visualize cells to be removed
plotColData(sce, x = "detected", y="subsets_Mito_percent", colour_by = "experiment")
plotColData(sce, x = "sum", y="detected", colour_by="experiment")
table(sce$donor, sce$discardCells)
##
## FALSE TRUE
## aowh 367 9
## aoxv 284 9
## babz 44 25
## bezi 30 1
## bima 73 2
## bokz 625 12
## cicb 161 3
## ciwj 135 6
## cuhk 147 7
## datg 569 14
## dixh 90 113
## eesb 452 18
## eipl 537 6
## eiwy 77 1
## eoxi 42 18
table(sce$donor, sce$discardCells)/rowSums(table(sce$donor, sce$discardCells))
##
## FALSE TRUE
## aowh 0.97606383 0.02393617
## aoxv 0.96928328 0.03071672
## babz 0.63768116 0.36231884
## bezi 0.96774194 0.03225806
## bima 0.97333333 0.02666667
## bokz 0.98116170 0.01883830
## cicb 0.98170732 0.01829268
## ciwj 0.95744681 0.04255319
## cuhk 0.95454545 0.04545455
## datg 0.97598628 0.02401372
## dixh 0.44334975 0.55665025
## eesb 0.96170213 0.03829787
## eipl 0.98895028 0.01104972
## eiwy 0.98717949 0.01282051
## eoxi 0.70000000 0.30000000
#fractions of removed cells per donor
Most removed cells (fraction) are from patients dixh and babz.
table(sce$experiment, sce$discardCells)
##
## FALSE TRUE
## expt_21 90 113
## expt_22 42 18
## expt_24 44 25
## expt_29 336 7
## expt_30 31 2
## expt_31 308 10
## expt_33 227 9
## expt_34 967 17
## expt_36 77 1
## expt_39 30 1
## expt_41 537 6
## expt_42 73 2
## expt_43 736 27
## expt_45 135 6
table(sce$experiment, sce$donor)
##
## aowh aoxv babz bezi bima bokz cicb ciwj cuhk datg dixh eesb eipl eiwy
## expt_21 0 0 0 0 0 0 0 0 0 0 203 0 0 0
## expt_22 0 0 0 0 0 0 0 0 0 0 0 0 0 0
## expt_24 0 0 69 0 0 0 0 0 0 0 0 0 0 0
## expt_29 343 0 0 0 0 0 0 0 0 0 0 0 0 0
## expt_30 33 0 0 0 0 0 0 0 0 0 0 0 0 0
## expt_31 0 0 0 0 0 0 164 0 154 0 0 0 0 0
## expt_33 0 0 0 0 0 0 0 0 0 236 0 0 0 0
## expt_34 0 0 0 0 0 637 0 0 0 347 0 0 0 0
## expt_36 0 0 0 0 0 0 0 0 0 0 0 0 0 78
## expt_39 0 0 0 31 0 0 0 0 0 0 0 0 0 0
## expt_41 0 0 0 0 0 0 0 0 0 0 0 0 543 0
## expt_42 0 0 0 0 75 0 0 0 0 0 0 0 0 0
## expt_43 0 293 0 0 0 0 0 0 0 0 0 470 0 0
## expt_45 0 0 0 0 0 0 0 141 0 0 0 0 0 0
##
## eoxi
## expt_21 0
## expt_22 60
## expt_24 0
## expt_29 0
## expt_30 0
## expt_31 0
## expt_33 0
## expt_34 0
## expt_36 0
## expt_39 0
## expt_41 0
## expt_42 0
## expt_43 0
## expt_45 0
Most removed cells (fraction) are from patients dixh and babz. Most low library sizes seem to come from patient dixh; for patient babz the effect is less pronounced.
plotColData(sce[,sce$donor=="dixh"], x = "sum", y="detected")
plotColData(sce[,sce$donor=="babz"], x = "sum", y="detected")
As such, we are mainly removing cells from specific patients and the respective batches in which they were sequenced. However, we want to be careful; we only want to remove technical artefacts, while retaining as much of the biology as possible. In our exploratory figure, we see that the cells we are removing based on the number of genes detected, are quite far apart from the bulk of the data cloud; as such, these cells are indeed suspicious. For the criterion of library size, we see that the cells removed there are still strongly connected to the data cloud. As such, we may want to relax the filtering criterion there a little bit. When we think of how the adaptive threshold strategy works, we may want to remove cells that are 4MADs away from the center, rather than the default 3 MADs.
# previously
lowLib <- isOutlier(df$sum, type="lower", log=TRUE)
table(lowLib)
## lowLib
## FALSE TRUE
## 3676 201
# after seeing appropriate exploratory figure
lowLib <- isOutlier(df$sum, nmads=4, type="lower", log=TRUE)
table(lowLib)
## lowLib
## FALSE TRUE
## 3783 94
discardCells <- (lowLib | lowFeatures | highMito)
table(discardCells)
## discardCells
## FALSE TRUE
## 3731 146
colData(sce)$discardCells <- discardCells
Note that these steps are not exact; different analysts will come with different filtering criteria for many of the steps. The key ideas are that we let appropriate exploratory figures guide us to make reasonable choices; i.e., we look at the data rather than blindly following a standardized pipeline that may work well in many cases, but maybe not our particular dataset.
# remove cells identified using adaptive thresholds
sce <- sce[, !colData(sce)$discardCells]
For normalization, the size factors \(s_i\) computed here are simply scaled library sizes:
\[ N_i = \sum_g Y_{gi} \] \[ s_i = N_i / \bar{N}_i \]
sce <- logNormCounts(sce)
# note we also returned log counts: see the additional logcounts assay.
sce
## class: SingleCellExperiment
## dim: 10374 3731
## metadata(0):
## assays(2): counts logcounts
## rownames: NULL
## rowData names(2): Ensembl Symbol
## colnames(3731): 21554_5#128 21554_5#142 ... 24947_6#91 24947_6#98
## colData names(101): assigned auxDir ... discardCells sizeFactor
## reducedDimNames(0):
## mainExpName: NULL
## altExpNames(0):
# you can extract size factors using
sf <- librarySizeFactors(sce)
mean(sf) # equal to 1 due to scaling.
## [1] 1
plot(x= log(colSums(assays(sce)$counts)),
y=sf)
— end lab session 1 —
library(scran)
rownames(sce) <- rowData(sce)$Ensembl
dec <- modelGeneVar(sce)
head(dec)
## DataFrame with 6 rows and 6 columns
## mean total tech bio p.value FDR
## <numeric> <numeric> <numeric> <numeric> <numeric> <numeric>
## ENSG00000000003 5.452309 0.863745 1.28581 -0.4220675 0.773732 0.882711
## ENSG00000000419 5.832487 1.028264 1.07766 -0.0493993 0.541773 0.882402
## ENSG00000000457 0.760778 1.175215 1.72646 -0.5512453 0.767517 0.882402
## ENSG00000000460 3.112458 1.545048 2.62346 -1.0784118 0.826574 0.888076
## ENSG00000001036 3.570987 2.180611 2.44778 -0.2671676 0.598622 0.882402
## ENSG00000001084 1.698901 2.386060 2.55583 -0.1697705 0.560411 0.882402
fit <- metadata(dec)
plot(fit$mean, fit$var,
xlab="Mean of log-expression",
ylab="Variance of log-expression")
curve(fit$trend(x), col="dodgerblue", add=TRUE, lwd=2)
# get top 1000 highly variable genes
hvg <- getTopHVGs(dec,
n=1000)
head(hvg)
## [1] "ENSG00000147869" "ENSG00000158815" "ENSG00000095596" "ENSG00000104371"
## [5] "ENSG00000185155" "ENSG00000120937"
# plot these
plot(fit$mean, fit$var,
col = c("orange", "darkseagreen3")[(names(fit$mean) %in% hvg)+1],
xlab="Mean of log-expression",
ylab="Variance of log-expression")
curve(fit$trend(x), col="dodgerblue", add=TRUE, lwd=2)
legend("topleft",
legend = c("Selected", "Not selected"),
col = c("darkseagreen3", "orange"),
pch = 16,
bty='n')
set.seed(1234)
sce <- runPCA(sce,
ncomponents=30,
subset_row=hvg)
plotPCA(sce,
colour_by = "day")
PCA has been performed. The PCA information has been automatically stored in the reducedDim slot of the SingleCellExperiment object.
reducedDimNames(sce)
## [1] "PCA"
head(reducedDim(sce,
type="PCA"))
## PC1 PC2 PC3 PC4 PC5 PC6
## 21554_5#128 -27.364852 9.594060 11.373564 30.76916 15.394118 -27.80931
## 21554_5#142 -26.726070 8.421937 8.355765 32.85938 -6.317488 -23.51960
## 21554_5#174 -16.417046 16.397747 9.339535 29.60662 20.174990 -23.62026
## 21554_5#176 -4.164451 15.322549 23.551281 26.57112 31.728127 -19.74506
## 21554_5#181 -22.143156 7.574105 9.966680 36.27930 12.204297 -26.26973
## 21554_5#183 -15.698590 15.525954 -13.216317 33.72004 -2.447697 -30.98004
## PC7 PC8 PC9 PC10 PC11 PC12
## 21554_5#128 -9.9341013 1.1274408 1.9843292 2.089876 -6.675600 -2.5301641
## 21554_5#142 8.7769917 9.3930201 -15.5791940 -11.423582 -1.627703 -4.0035113
## 21554_5#174 10.2249020 -1.3204708 -6.4121141 1.720833 17.826228 9.1196366
## 21554_5#176 -0.6745669 -0.9984625 -9.3857501 -4.181246 22.207032 4.5883345
## 21554_5#181 -0.9321336 -0.2769346 4.6500493 2.622540 -6.233380 0.2515426
## 21554_5#183 7.3013430 6.9185899 -0.2620361 -4.045207 -3.049075 -2.0505240
## PC13 PC14 PC15 PC16 PC17 PC18
## 21554_5#128 -3.4087788 0.02235213 1.420843 0.7293154 1.641542 0.6151673
## 21554_5#142 -9.8428164 -8.27401019 -1.105121 -3.1555296 -3.133926 -1.3455544
## 21554_5#174 -5.6386833 -2.31652358 4.070325 1.2707437 5.098201 0.3038039
## 21554_5#176 0.2916532 -14.56508569 -6.339897 -2.6535029 5.029821 -0.7144418
## 21554_5#181 -4.9837082 8.00451503 4.048105 9.2870735 4.160989 1.2141390
## 21554_5#183 -2.9067374 1.20335452 5.120246 3.1371635 6.098355 -2.2775478
## PC19 PC20 PC21 PC22 PC23 PC24
## 21554_5#128 5.2011841 -6.907664 -6.2647843 3.606747 -1.7431652 -1.5744543
## 21554_5#142 -0.3342526 9.669928 -0.8871485 -4.424970 11.8442676 1.7455513
## 21554_5#174 0.3691554 2.859107 3.0277004 -2.735873 5.1592166 -5.9721144
## 21554_5#176 -5.3943048 -2.840958 2.5375172 1.622524 4.6667312 -5.6725527
## 21554_5#181 3.2186974 -3.446394 -0.1923339 -2.137403 -0.7645622 -0.3988127
## 21554_5#183 2.0889075 -2.666797 0.1332723 1.779886 3.6862353 2.0159151
## PC25 PC26 PC27 PC28 PC29 PC30
## 21554_5#128 -0.102636 0.7537463 -3.5420691 3.7249324 5.837253 -1.4452956
## 21554_5#142 -3.035680 1.2788784 4.0515276 1.7271103 -4.905858 -3.7472445
## 21554_5#174 -6.154168 -6.3600204 -0.2767746 4.4168734 7.188644 -3.2498909
## 21554_5#176 -3.992011 -4.7046345 -0.4531632 2.8712394 2.112673 -9.8468144
## 21554_5#181 2.443517 0.9264037 -0.4445118 4.1346673 -1.364736 2.1226600
## 21554_5#183 -2.209305 -3.0687859 0.1412868 0.4865902 2.936611 0.5889153
The plotPCA function of the scater package now allows us to visualize the cells in PCA space, based on the PCA information stored in our object:
plotPCA(sce,
colour_by = "day")
We see that for this dataset, PCA is able to distinguish between the different developmental stages quite well.
library(glmpca)
set.seed(211103)
poipca <- glmpca(Y = assays(sce)$counts[hvg,],
L = 2,
fam = "poi",
minibatch = "stochastic")
reducedDim(sce, "PoiPCA") <- poipca$factors
plotReducedDim(sce,
dimred="PoiPCA",
colour_by = "day")
We observe a similar reduced dimension plot as for the classical PCA approach, with reasonable separation between cells of different developmental stages.
set.seed(8778)
sce <- runTSNE(sce,
dimred = 'PCA',
external_neighbors=TRUE)
plotTSNE(sce,
colour_by = "day")
In this 2D t-SNE space, it is clear that cells of different developmental stages cluster separately. However, there appears to be some other level of heterogeneity in the data as well. We observe multiple clusters fo the cells sampled at the same time point. In addition, while still clustering separately, some clusters of cells of different time points are still very close together.
We will explore this phenomenon in more detail later
@Koen: this UMAP looks different and worse from the one you show in the course slides. Which parameters did you use (or maybe upstream, #HVGs and so on)?
set.seed(65187)
sce <- runPCA(sce,
ncomponents=30,
subset_row=hvg)
sce <- runUMAP(sce,
dimred = 'PCA',
pca = 12,
external_neighbors = TRUE)
plotUMAP(sce,
colour_by = "day")
While the visualization is less clear than the t-SNE figure above, we observe a similar pattern in this UMAP; cells of different developmental stages cluster separately, however, there seems to be an additional level of heterogeneity in the data.
— end lab session 2 —
In this experiment, our main interest is to study the endoderm differentiation process, i.e. the 4-day differentiation process of induced pluripotent stem cells (iPSCs) at day0, via mesendoderm cells (day1) and another intermediate stage (day2) to endoderm cells (day3).
However, we will need to account for the fact that the cells have been sampled from 15 different subject, thus introducing additional biological heterogeneity. There are two variables in the colData of our SingleCellExperiment object that are useful for exploring this:
table(sce$donor,sce$experiment)
##
## expt_21 expt_22 expt_24 expt_29 expt_30 expt_31 expt_33 expt_34 expt_36
## aowh 0 0 0 342 32 0 0 0 0
## aoxv 0 0 0 0 0 0 0 0 0
## babz 0 0 54 0 0 0 0 0 0
## bezi 0 0 0 0 0 0 0 0 0
## bima 0 0 0 0 0 0 0 0 0
## bokz 0 0 0 0 0 0 0 635 0
## cicb 0 0 0 0 0 164 0 0 0
## ciwj 0 0 0 0 0 0 0 0 0
## cuhk 0 0 0 0 0 152 0 0 0
## datg 0 0 0 0 0 0 235 346 0
## dixh 109 0 0 0 0 0 0 0 0
## eesb 0 0 0 0 0 0 0 0 0
## eipl 0 0 0 0 0 0 0 0 0
## eiwy 0 0 0 0 0 0 0 0 78
## eoxi 0 47 0 0 0 0 0 0 0
##
## expt_39 expt_41 expt_42 expt_43 expt_45
## aowh 0 0 0 0 0
## aoxv 0 0 0 292 0
## babz 0 0 0 0 0
## bezi 30 0 0 0 0
## bima 0 0 73 0 0
## bokz 0 0 0 0 0
## cicb 0 0 0 0 0
## ciwj 0 0 0 0 137
## cuhk 0 0 0 0 0
## datg 0 0 0 0 0
## dixh 0 0 0 0 0
## eesb 0 0 0 466 0
## eipl 0 539 0 0 0
## eiwy 0 0 0 0 0
## eoxi 0 0 0 0 0
We have cells from 15 different patients and 14 different “experiments” (= sequencing batches).
We now will assess if this additional source of heterogeneity is also picked up in the reduced dimension plot
# time effect in PCA space, all time points
plotPCA(sce,
colour_by = "day")
# donor (nuisance) effect in PCA space, all time points
plotPCA(sce,
colour_by = "donor")
# experiment (nuisance) effect in PCA space, all time points
plotPCA(sce,
colour_by = "experiment")
We see that within a certain time point, cells of the same patient/experiment seem to cluster together at least to some extent. This effect becomes clearer when we visualize the data of the different time points separately.
# donor effect in PCA space, per time point
plotPCA(sce[,sce$day=="day0"],
colour_by = "donor")
plotPCA(sce[,sce$day=="day1"],
colour_by = "donor")
plotPCA(sce[,sce$day=="day2"],
colour_by = "donor")
plotPCA(sce[,sce$day=="day3"],
colour_by = "donor")
Analogously, we may inspect the effect of patient and experiment in the t-SNE plot we created earlier:
# time effect
plotTSNE(sce,
colour_by = "day")
# nuisance effects in t-SNE space, all time points
plotTSNE(sce,
colour_by = "donor")
plotTSNE(sce,
colour_by = "experiment")
As expected, we see that the additional heterogeneity observed in the clusters colored based on the time points can be explained by the patient effects.
In this experiment, the primary interest are the changes in gene expression across the four days, reflecting differentiation from induced pluripotent stem cells to endoderm cells. In contrast, the between-patient effects are not of interest here. Using batch correction, we will aim to “correct” for the donor effects, while retaining the main biological variation of interest!
We will explore two popular strategies for batch correction for scRNA-Seq data: Seurat CCA and Harmony.
TODO: add a brief (high-level, conceptual) description on CCA.
library(Seurat)
seurat_obj <- as.Seurat(sce)
seurat_obj
## An object of class Seurat
## 10374 features across 3731 samples within 1 assay
## Active assay: originalexp (10374 features, 0 variable features)
## 3 dimensional reductions calculated: PCA, PoiPCA, TSNE
@Koen: When you ran the Seurat CCA algorithm, it was on the Cuomo dataset without quality control. HEre, after quality control, we have 1 group with less or equal than 30 cells. As a consequence, the FindIntegrationAnchors function breaks. It can be rescued by setting the dims argument lower, however, then the IntegrateData function may break. This all has been well documented in the following GitHub issues:
The bottom line of the responses from the maintainers is that CCA should not be used with small batches, e.g., less than 30 or even 100 cells. They suggest either merging such small batches with other batches, or removing the small batches altogether. They actually seem to favor the latter, do that is what I will be doing here.
In the code chunk below, I remove cells for patients that have less or equal than 30 cells. If we do not do this, we will get issues downstream with the Seurat functions FindIntegrationAnchors and IntegrateData, which break down when the number of cells per batch is small. This is a known issue
and the package maintainers suggest to either remove or manually merge small batches together. We will here simply remove the cells of patient “bezi”;
table(seurat_obj$donor)
##
## aowh aoxv babz bezi bima bokz cicb ciwj cuhk datg dixh eesb eipl eiwy eoxi
## 374 292 54 30 73 635 164 137 152 581 109 466 539 78 47
table(seurat_obj$donor)[table(seurat_obj$donor) <= 30]
## bezi
## 30
seurat_obj <- seurat_obj[,-which(seurat_obj$donor == names(table(seurat_obj$donor)[table(seurat_obj$donor) <= 30]))]
After this, the SplitObject object function is used to generate a list of Seurat objects, where each list elements hold the data of 1 batch (patient):
seurat_obj.list <- SplitObject(seurat_obj, split.by = "donor")
nlevels(as.factor(sce$donor)) # originally 15 patients
## [1] 15
length(seurat_obj.list) # 14 patients left
## [1] 14
Next, Seurat will perform the following steps for batch correction:
NormalizeData: by default, takes the count assay of the Seurat object and performs a log-transformation, resulting in an additional log-transformed assay. This is performed for each batch separately.
FindVariableFeatures: Feature selection, using the variance-stabilizing transformation (VST) from Seurat, which amounts to calculating Pearson residuals from a regularized negative binomial regression model, with sequencing depth as a covariate. This is performed for each batch separately.
SelectIntegrationFeatures: Choose the features to use when integrating multiple datasets or batches. This function ranks features by the number of batches they are deemed variable in, breaking ties by the median variable feature rank across datasets. It returns the top scoring features by this ranking, which are called “anchors”.
IntegrateData: Performs dataset integration using a pre-computed set of “anchors” as obtained with SelectIntegrationFeatures.
# Normalize and identify variable features for each dataset (patient) independently
seurat_obj.list <- lapply(X = seurat_obj.list, FUN = function(x) {
x <- NormalizeData(x,verbose = FALSE)
x <- FindVariableFeatures(x,
selection.method = "vst",
nfeatures = 1000,
verbose = FALSE)
})
# Select features that are repeatedly variable across datasets for integration
features <- SelectIntegrationFeatures(object.list = seurat_obj.list)
# Choose the features to use when integrating multiple datasets or batches
anchors <- FindIntegrationAnchors(object.list = seurat_obj.list,
anchor.features = features,
verbose = FALSE)
# This command creates an 'integrated' data assay. We have set the `k.weight`
# argument, which specifies the Number of neighbors to consider when weighting
# anchors, to 30 (default is 100). This was necessary to avoid errors, since we
# have many batches with less than 100 cells.
data.combined <- IntegrateData(anchorset = anchors,
k.weight = 30,
verbose=FALSE)
data.combined
## An object of class Seurat
## 12374 features across 3701 samples within 2 assays
## Active assay: integrated (2000 features, 2000 variable features)
## 1 other assay present: originalexp
Now, we have a new Seurat object, which again is a single object (not a list) storing the expression values of all cells after batch correction. Note that the original expression values are still present in the originalexp assay of the object.
@Koen: I was thinking of not using the chunk below, i.e. to only perform a t-SNE and to not perform clustering on the Seurat batch corrected data. I would only perform clustering on the Harmony normalized data. We can discuss this, we could include Seurat as well for comparison, but maybe that would bring us too far.
data.combined <- RunUMAP(object = data.combined,
reduction = "pca",
dims = 1:12,
min.dist=0.4,
n.neighbors=15,
verbose = FALSE)
data.combined <- FindNeighbors(object = data.combined,
reduction = "PCA_SeuBatch",
dims = 1:12,
verbose = FALSE)
data.combined <- FindClusters(object = data.combined,
resolution = 0.5,
verbose = FALSE)
Finally, we may use Seurat functions for performing dimension reduction and visualization of the batch corrected data.
# Run the standard Seurat workflow for visualization and clustering
# Step 1: Scales and centers features in the dataset, prior step to PCA
data.combined <- ScaleData(object = data.combined,
verbose = FALSE)
# Step 2: Perform PCA to 30 dimensions
data.combined <- RunPCA(object = data.combined,
npcs = 30,
reduction.name = "PCA_SeuBatch",
verbose = FALSE)
# Step 3: Perform t-SNE on the first 12 principal components
data.combined <- RunTSNE(object = data.combined,
reduction = "PCA_SeuBatch",
reduction.name = "tSNE_SeuBatch",
dims = 1:12)
# t-SNE visualization
p1 <- DimPlot(object = data.combined,
reduction = "tSNE_SeuBatch",
group.by = "day")
p2 <- DimPlot(object = data.combined,
reduction = "tSNE_SeuBatch",
group.by = "donor")
p1 + p2
Alternatively, we can transform the Seurat object back to a SingleCellExperiment object and generate the visualizations using the Bioconductor functions that we used previously:
# Convert Seurat object to SingleCellExperiment object
sce_intSeurat <- as.SingleCellExperiment(data.combined)
# t-SNE without Seurat batch correction
p1 <- plotTSNE(sce,
colour_by = "day") + ggtitle("No batch")
p2 <- plotTSNE(sce,
colour_by = "donor") + ggtitle("No batch")
# t-SNE with Seurat batch correction
p3 <- plotReducedDim(sce_intSeurat,
dimred = "TSNE_SEUBATCH",
colour_by = "day") + ggtitle("Batch corrected")
p4 <- plotReducedDim(sce_intSeurat,
dimred = "TSNE_SEUBATCH",
colour_by = "donor") + ggtitle("Batch corrected")
p1 + p3
p2 + p4
The batch correction seems to have worked very well. Both with and without batch correction, we observe that cells of the same time point cluster together. But when no batch correction is performed, there seems to be an additional level of variability in the data.
When we color the cells based on the donor variable, we see that without batch correction cells of the same donor cluster together. After batch correction, such donor effects seem to be no longer present.
Also note that while batch correction remove the between-donor variability, the between-day variability that is of interest is still preserved. As such, based on our visualizations, it looks like the batch correction did not overcorrect and succeeded in removing only the unwanted variation in the data.
TODO: add a brief (high-level, conceptual) description on Harmony.
Performing batch correction with harmony is very simple; we only need a single function call and can directly work with our SingleCellExperiment object.
The function RunHarmony takes the follwoing inputs:
object: name of the SingleCellExperiment object
group.by.vars: the names of the variables for which we want to perform batch correction. Conveniently, this can be multiple variables, so we can correct both for donor and experiment effects (since the correspondence between the two was not perfect)
reduction: Name of the previously computed reduced dimension space on which batch correction will be performed (not on raw data to improve signal-to-noise ratio). Typically PCA is chosen
reduction.save: name to store the batch corrected dimenion reduced space
verbose: print progress or not.
library(harmony)
## Loading required package: Rcpp
set.seed(684864)
sce <- harmony::RunHarmony(object = sce,
group.by.vars = c("donor", "experiment"),
reduction = "PCA",
reduction.save = "HARMONY_donor_experiment",
verbose = FALSE)
The output is an additional element in the reduced dimension space:
reducedDim(sce, type="PCA")[1:5,1:2]
## PC1 PC2
## 21554_5#128 -27.364852 9.594060
## 21554_5#142 -26.726070 8.421937
## 21554_5#174 -16.417046 16.397747
## 21554_5#176 -4.164451 15.322549
## 21554_5#181 -22.143156 7.574105
reducedDim(sce, type="HARMONY_donor_experiment")[1:5,1:2]
## HARMONY_donor_experiment_1 HARMONY_donor_experiment_2
## 21554_5#128 -25.018798 9.814387
## 21554_5#142 -24.132245 5.674046
## 21554_5#174 -15.195094 15.285123
## 21554_5#176 8.976033 6.388050
## 21554_5#181 -19.884244 7.579687
for which the values differ as compared to the original PCA coordinates. This is a consequence of the batch correction.
plotReducedDim(object = sce,
dimred = "PCA",
colour_by = "day")
plotReducedDim(object = sce,
dimred = "HARMONY_donor_experiment",
colour_by = "day")
When coloring on day, both plots look very similar.
plotReducedDim(object = sce,
dimred = "PCA",
colour_by = "donor")
plotReducedDim(object = sce,
dimred = "HARMONY_donor_experiment",
colour_by = "donor")
This figure is too cluttered to see the donor effects. Below, we therefore generate a separate figure for each timepoint.
# without bacth correction
p1 <- plotReducedDim(object = sce,
dimred = "PCA",
colour_by = "donor")
p1 + facet_wrap(~sce$day, ncol=1)
Especially for time points day0 and day1, we observe clear donor effects.
# harmony batch correction
p2 <- plotReducedDim(object = sce,
dimred = "HARMONY_donor_experiment",
colour_by = "donor")
p2 + facet_wrap(~sce$day, ncol=1)
After batch correction with harmony, the donor effects are largely removed.
When using t-SNE visualization, the discrepancy between the uncorrected and batch corrected data becomes even clearer:
# make t-SNE space based on batch-corrected PCA data
sce <- runTSNE(sce,
dimred = 'HARMONY_donor_experiment',
external_neighbors=TRUE,
name = "TSNE_HARMONY_donor_experiment")
# No batch versus batch corrected, color by day
p1 <- plotReducedDim(sce,
dimred = "TSNE",
colour_by = "day")
p2 <- plotReducedDim(sce,
dimred = "TSNE_HARMONY_donor_experiment",
colour_by = "day")
p1 + p2
# No batch versus batch corrected, color by donor
p3 <- plotReducedDim(sce,
dimred = "TSNE",
colour_by = "donor")
p4 <- plotReducedDim(sce,
dimred = "TSNE_HARMONY_donor_experiment",
colour_by = "donor")
p3 + p4
# No batch versus batch corrected, color by experiment
p5 <- plotReducedDim(sce,
dimred = "TSNE",
colour_by = "experiment")
p6 <- plotReducedDim(sce,
dimred = "TSNE_HARMONY_donor_experiment",
colour_by = "experiment")
p5 + p6
saveRDS(sce, "/Users/jg/Desktop/sce_after_batch.rds")
In the second lab session, we discussed graph-based clustering, k-means clustering and hierarchical cluster. Here, we will only perform hierarchical clustering.
We may split the hierarchical clustering process in two intuitive steps:
Compute the pairwise distances between all cells. These are by default euclidean distances and, in order to reduce data complexity and increase signal to noise, we may perform this on the top (30) PC’s. Implemented in the dist function.
This function performs a hierarchical cluster analysis the distances from step1. Initially, each cell is assigned to its own cluster and then the algorithm proceeds iteratively, at each stage joining the two most similar clusters, continuing until there is just a single cluster. Implemented in the hclust function.
Note that the hclust function allows for specifying a “method” argument. The differences between the different methods goes beyond the scope of this session, but a brief description is provided in the function help file. In the context of scRNA-seq, I have mostly seen the use of the “ward.D2” method.
distsce <- dist(reducedDim(sce, "HARMONY_donor_experiment"))
hcl <- hclust(distsce, method = "ward.D2")
plot(hcl, labels = FALSE)
Next, we need to “cut the tree”, i.e., choose at which resolution we want to report the (cell-type) clusters. This can be achieved with the cutree function. As an input, cutree takes the dendrogram from the hclust function and a threshold value for cutting the tree. This is either k, the number of clusters we want to report, or h, the height in the dendrogram at which we wan to cut the tree.
Here we choose k = 4, since we know in advance that we expect four clusters of cells (four time points).
clust_hcl_k4 <- cutree(hcl, k = 4)
table(clust_hcl_k4)
## clust_hcl_k4
## 1 2 3 4
## 575 1233 1061 862
Next, we visualize the data in PCA space colored based on time point and based on our inferred cluster labels.
sce$clust_hcl_k4 <- as.factor(clust_hcl_k4)
plotReducedDim(sce,
dimred = "HARMONY_donor_experiment",
colour_by="clust_hcl_k4")
plotReducedDim(sce,
dimred = "HARMONY_donor_experiment",
colour_by ="day")
Wikipedia provides a decent high-level description of this trajectory inference:
“Trajectory inference or pseudotemporal ordering is a computational technique used in single-cell transcriptomics to determine the pattern of a dynamic process experienced by cells and then arrange cells based on their progression through the process. […] Trajectory inference seeks to characterize [such] differences by placing cells along a continuous path that represents the evolution of the process rather than dividing cells into discrete clusters. In some methods this is done by projecting cells onto an axis called pseudotime which represents the progression through the process.”
Here, we will use slingshot to create a trajectory for the Cuomo dataset.
library(slingshot)
## Loading required package: princurve
## Loading required package: TrajectoryUtils
##
## Attaching package: 'TrajectoryUtils'
## The following object is masked from 'package:scran':
##
## createClusterMST
sce <- slingshot(sce,
start.clus = "2",
end.clus = "3",
clusterLabels = "clust_hcl_k4",
reducedDim = "HARMONY_donor_experiment")
plot(reducedDims(sce)$HARMONY_donor_experiment[,c(1,2)],
col = as.factor(sce$clust_hcl_k4),
pch=16,
asp = 1)
lines(SlingshotDataSet(sce),
lwd=2,
type = 'lineages',
col = 'black')
plot(reducedDims(sce)$HARMONY_donor_experiment,
col = as.factor(sce$day),
pch=16,
asp = 1)
lines(SlingshotDataSet(sce),
lwd=2,
type = 'lineages',
col = 'black')
library(tradeSeq)
### Find knots
# We first need to decide on the number of knots. This is done using the -->
# `evaluateK` function. This takes a little time. -->
# takes 9min for me
set.seed(5)
icMat <- evaluateK(counts = assays(sce)$counts,
sds = sling$slingshot,
k = 3:10,
nGenes = 500,
verbose = T)
set.seed(7)
subset_genes <- sample(rownames(sce), 1000, replace = FALSE)
# genes from paper
markers <- c("ENSG00000111704", "ENSG00000164458", "ENSG00000141448")
# make sure the genes from the paper are in there
subset_genes <- c(subset_genes, markers[!markers %in% subset_genes])
#20min for all genes, ±2min30 for 1000 genes
pseudotime <- slingPseudotime(sce, na = FALSE)
cellWeights <- slingCurveWeights(sce)
sce_fit <- fitGAM(counts = assays(sce)$counts[subset_genes,],
pseudotime = pseudotime,
cellWeights = cellWeights,
nknots = 6,
verbose = TRUE)
table(rowData(sce_fit)$tradeSeq$converged)
##
## TRUE
## 1003
# ±20sec
assoRes <- associationTest(sce_fit)
head(assoRes)
## waldStat df pvalue meanLogFC
## ENSG00000203879 975.46041 10 0.000000e+00 0.3006228
## ENSG00000169567 387.14487 10 0.000000e+00 0.3233008
## ENSG00000135926 96.03573 10 3.330669e-16 0.4028965
## ENSG00000113645 154.30228 10 0.000000e+00 1.0592169
## ENSG00000151612 21.64792 10 1.700223e-02 0.2511431
## ENSG00000100325 89.05684 10 8.215650e-15 0.2003330
sum(p.adjust(assoRes$pvalue, method = "BH") < 0.05, na.rm=T)/nrow(assoRes)
## [1] 0.7268195
# @Koen ±90% significant (?)
startRes <- startVsEndTest(sce_fit)
oStart <- order(startRes$waldStat, decreasing = TRUE)
for (i in 1:5) {
sigGeneStart <- oStart[i] # top 5 most significant genes in the start vs. end test
print(plotSmoothers(sce_fit,
assays(sce_fit)$counts,
gene = sigGeneStart) +
ggtitle(rownames(sce)[sigGeneStart]))
}
In the Cuomo paper, the authors highlighted the following genes:
plotSmoothers(sce_fit,
assays(sce_fit)$counts,
gene = which(rownames(sce_fit) == "ENSG00000111704"))
plotSmoothers(sce_fit,
assays(sce_fit)$counts,
gene = which(rownames(sce_fit) == "ENSG00000164458"))
plotSmoothers(sce_fit,
assays(sce_fit)$counts,
gene = which(rownames(sce_fit) == "ENSG00000141448"))
A very nice correspondence with the results presented in the paper!!
knitr::include_graphics("./../cuomo_traj1.jpeg")
knitr::include_graphics("./../cuomo_traj2.jpeg")
knitr::include_graphics("./../cuomo_traj3.jpeg")
plotGeneCount(sce$slingshot,
assays(sce_fit)$counts,
gene = which(rownames(sce_fit) == "ENSG00000111704"))
plotGeneCount(sce$slingshot,
assays(sce_fit)$counts,
gene = which(rownames(sce_fit) == "ENSG00000164458"))
plotGeneCount(sce$slingshot,
assays(sce_fit)$counts,
gene = which(rownames(sce_fit) == "ENSG00000141448"))